bimodal blkback: Support multiple ring protocols.
authorkfraser@localhost.localdomain <kfraser@localhost.localdomain>
Wed, 24 Jan 2007 10:38:17 +0000 (10:38 +0000)
committerkfraser@localhost.localdomain <kfraser@localhost.localdomain>
Wed, 24 Jan 2007 10:38:17 +0000 (10:38 +0000)
This is needed for 32-on-64 support.  Right now there are three
protocols: native, x86_32 and x86_64.  If needed it can be extended.

Interface changes (io/blkif.h)
 * Define the x86_32 and x86_64 structs additionally to the native
   version.
 * Add helper functions to convert them requests to native.

Backend changes:
 * Look at the "protocol" name of the frontend and switch ring
   handling accordingly.  If the protocol node isn't present it
   assumes native protocol.
 * As the request struct is copied anyway before being processed (for
   security reasons) it is converted to native at that point so most
   backend code doesn't need to know what the frontend speaks.
 * In case of blktap this is completely transparent to userspace, the
   kernel/userspace ring is always native no matter what the frontend
   speaks.

Signed-off-by: Gerd Hoffmann <kraxel@suse.de>
linux-2.6-xen-sparse/drivers/xen/blkback/blkback.c
linux-2.6-xen-sparse/drivers/xen/blkback/common.h
linux-2.6-xen-sparse/drivers/xen/blkback/interface.c
linux-2.6-xen-sparse/drivers/xen/blkback/xenbus.c
linux-2.6-xen-sparse/drivers/xen/blktap/blktap.c
linux-2.6-xen-sparse/drivers/xen/blktap/common.h
linux-2.6-xen-sparse/drivers/xen/blktap/interface.c
linux-2.6-xen-sparse/drivers/xen/blktap/xenbus.c
linux-2.6-xen-sparse/include/xen/blkif.h [new file with mode: 0644]
xen/include/public/io/blkif.h

index e8df9e034648567cb09ee49ddbdd0bb506293da2..f69bed26fe298ce9fc5fb5a2bc089e6f5a1e9279 100644 (file)
@@ -298,17 +298,20 @@ irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
 
 static int do_block_io_op(blkif_t *blkif)
 {
-       blkif_back_ring_t *blk_ring = &blkif->blk_ring;
+       blkif_back_rings_t *blk_rings = &blkif->blk_rings;
        blkif_request_t req;
        pending_req_t *pending_req;
        RING_IDX rc, rp;
        int more_to_do = 0;
 
-       rc = blk_ring->req_cons;
-       rp = blk_ring->sring->req_prod;
+       rc = blk_rings->common.req_cons;
+       rp = blk_rings->common.sring->req_prod;
        rmb(); /* Ensure we see queued requests up to 'rp'. */
 
-       while ((rc != rp) && !RING_REQUEST_CONS_OVERFLOW(blk_ring, rc)) {
+       while ((rc != rp)) {
+
+               if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
+                       break;
 
                pending_req = alloc_req();
                if (NULL == pending_req) {
@@ -317,8 +320,20 @@ static int do_block_io_op(blkif_t *blkif)
                        break;
                }
 
-               memcpy(&req, RING_GET_REQUEST(blk_ring, rc), sizeof(req));
-               blk_ring->req_cons = ++rc; /* before make_response() */
+               switch (blkif->blk_protocol) {
+               case BLKIF_PROTOCOL_NATIVE:
+                       memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
+                       break;
+               case BLKIF_PROTOCOL_X86_32:
+                       blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
+                       break;
+               case BLKIF_PROTOCOL_X86_64:
+                       blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
+                       break;
+               default:
+                       BUG();
+               }
+               blk_rings->common.req_cons = ++rc; /* before make_response() */
 
                switch (req.operation) {
                case BLKIF_OP_READ:
@@ -498,34 +513,48 @@ static void dispatch_rw_block_io(blkif_t *blkif,
 static void make_response(blkif_t *blkif, unsigned long id, 
                          unsigned short op, int st)
 {
-       blkif_response_t *resp;
+       blkif_response_t  resp;
        unsigned long     flags;
-       blkif_back_ring_t *blk_ring = &blkif->blk_ring;
+       blkif_back_rings_t *blk_rings = &blkif->blk_rings;
        int more_to_do = 0;
        int notify;
 
-       spin_lock_irqsave(&blkif->blk_ring_lock, flags);
-
-       /* Place on the response ring for the relevant domain. */ 
-       resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
-       resp->id        = id;
-       resp->operation = op;
-       resp->status    = st;
-       blk_ring->rsp_prod_pvt++;
-       RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(blk_ring, notify);
+       resp.id        = id;
+       resp.operation = op;
+       resp.status    = st;
 
-       if (blk_ring->rsp_prod_pvt == blk_ring->req_cons) {
+       spin_lock_irqsave(&blkif->blk_ring_lock, flags);
+       /* Place on the response ring for the relevant domain. */
+       switch (blkif->blk_protocol) {
+       case BLKIF_PROTOCOL_NATIVE:
+               memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
+                      &resp, sizeof(resp));
+               break;
+       case BLKIF_PROTOCOL_X86_32:
+               memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
+                      &resp, sizeof(resp));
+               break;
+       case BLKIF_PROTOCOL_X86_64:
+               memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
+                      &resp, sizeof(resp));
+               break;
+       default:
+               BUG();
+       }
+       blk_rings->common.rsp_prod_pvt++;
+       RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
+       if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
                /*
                 * Tail check for pending requests. Allows frontend to avoid
                 * notifications if requests are already in flight (lower
                 * overheads and promotes batching).
                 */
-               RING_FINAL_CHECK_FOR_REQUESTS(blk_ring, more_to_do);
+               RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
 
-       } else if (RING_HAS_UNCONSUMED_REQUESTS(blk_ring)) {
+       } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
                more_to_do = 1;
-
        }
+
        spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
 
        if (more_to_do)
index 1c51768bb7c585ec7d04f4352a4f133a42702a82..67d5d44a09aee2cfd310ae6a60ae392f0337e18a 100644 (file)
@@ -40,8 +40,7 @@
 #include <asm/pgalloc.h>
 #include <xen/evtchn.h>
 #include <asm/hypervisor.h>
-#include <xen/interface/io/blkif.h>
-#include <xen/interface/io/ring.h>
+#include <xen/blkif.h>
 #include <xen/gnttab.h>
 #include <xen/driver_util.h>
 #include <xen/xenbus.h>
@@ -67,7 +66,8 @@ typedef struct blkif_st {
        /* Physical parameters of the comms window. */
        unsigned int      irq;
        /* Comms information. */
-       blkif_back_ring_t blk_ring;
+       enum blkif_protocol blk_protocol;
+       blkif_back_rings_t blk_rings;
        struct vm_struct *blk_ring_area;
        /* The VBD attached to this interface. */
        struct vbd        vbd;
index 12552fb17a37e80b9c9216805a552e3464e9e5a4..7872a0f6ccaa89b5f9891047d1c91a028166ef67 100644 (file)
@@ -95,7 +95,6 @@ static void unmap_frontend_page(blkif_t *blkif)
 
 int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn)
 {
-       blkif_sring_t *sring;
        int err;
 
        /* Already connected through? */
@@ -111,8 +110,31 @@ int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn)
                return err;
        }
 
-       sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
-       BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
+       switch (blkif->blk_protocol) {
+       case BLKIF_PROTOCOL_NATIVE:
+       {
+               blkif_sring_t *sring;
+               sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
+               BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
+               break;
+       }
+       case BLKIF_PROTOCOL_X86_32:
+       {
+               blkif_x86_32_sring_t *sring_x86_32;
+               sring_x86_32 = (blkif_x86_32_sring_t *)blkif->blk_ring_area->addr;
+               BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
+               break;
+       }
+       case BLKIF_PROTOCOL_X86_64:
+       {
+               blkif_x86_64_sring_t *sring_x86_64;
+               sring_x86_64 = (blkif_x86_64_sring_t *)blkif->blk_ring_area->addr;
+               BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
+               break;
+       }
+       default:
+               BUG();
+       }
 
        err = bind_interdomain_evtchn_to_irqhandler(
                blkif->domid, evtchn, blkif_be_int, 0, "blkif-backend", blkif);
@@ -143,10 +165,10 @@ void blkif_disconnect(blkif_t *blkif)
                blkif->irq = 0;
        }
 
-       if (blkif->blk_ring.sring) {
+       if (blkif->blk_rings.common.sring) {
                unmap_frontend_page(blkif);
                free_vm_area(blkif->blk_ring_area);
-               blkif->blk_ring.sring = NULL;
+               blkif->blk_rings.common.sring = NULL;
        }
 }
 
index b22712e80cbc33cc7c544ef164cfd78bdd591d9c..0d9b5e34b4684834dbe93d6eeca1ed2ea06cbb52 100644 (file)
@@ -459,6 +459,7 @@ static int connect_ring(struct backend_info *be)
        struct xenbus_device *dev = be->dev;
        unsigned long ring_ref;
        unsigned int evtchn;
+       char protocol[64] = "";
        int err;
 
        DPRINTK("%s", dev->otherend);
@@ -472,6 +473,24 @@ static int connect_ring(struct backend_info *be)
                return err;
        }
 
+       be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
+       err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
+                           "%63s", protocol, NULL);
+       if (err)
+               strcpy(protocol, "unspecified, assuming native");
+       else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
+               be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
+       else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
+               be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
+       else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
+               be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
+       else {
+               xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
+               return -1;
+       }
+       printk("blkback: ring-ref %ld, event-channel %d, protocol %d (%s)\n",
+              ring_ref, evtchn, be->blkif->blk_protocol, protocol);
+
        /* Map the shared frame, irq etc. */
        err = blkif_map(be->blkif, ring_ref, evtchn);
        if (err) {
index b1819615137403882bb9025d3b317bb1e8b18b64..d9e759eaf5d75be31a3443480af7fcf51e91a0fc 100644 (file)
@@ -1091,15 +1091,15 @@ irqreturn_t tap_blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
 static int print_dbug = 1;
 static int do_block_io_op(blkif_t *blkif)
 {
-       blkif_back_ring_t *blk_ring = &blkif->blk_ring;
+       blkif_back_rings_t *blk_rings = &blkif->blk_rings;
        blkif_request_t req;
        pending_req_t *pending_req;
        RING_IDX rc, rp;
        int more_to_do = 0;
        tap_blkif_t *info;
 
-       rc = blk_ring->req_cons;
-       rp = blk_ring->sring->req_prod;
+       rc = blk_rings->common.req_cons;
+       rp = blk_rings->common.sring->req_prod;
        rmb(); /* Ensure we see queued requests up to 'rp'. */
 
        /*Check blkif has corresponding UE ring*/
@@ -1130,8 +1130,8 @@ static int do_block_io_op(blkif_t *blkif)
                        more_to_do = 1;
                        break;
                }
-               
-               if (RING_REQUEST_CONS_OVERFLOW(blk_ring, rc)) {
+
+               if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) {
                        WPRINTK("RING_REQUEST_CONS_OVERFLOW!"
                               " More to do\n");
                        more_to_do = 1;
@@ -1145,8 +1145,21 @@ static int do_block_io_op(blkif_t *blkif)
                        break;
                }
 
-               memcpy(&req, RING_GET_REQUEST(blk_ring, rc), sizeof(req));
-               blk_ring->req_cons = ++rc; /* before make_response() */ 
+               switch (blkif->blk_protocol) {
+               case BLKIF_PROTOCOL_NATIVE:
+                       memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc),
+                              sizeof(req));
+                       break;
+               case BLKIF_PROTOCOL_X86_32:
+                       blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
+                       break;
+               case BLKIF_PROTOCOL_X86_64:
+                       blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
+                       break;
+               default:
+                       BUG();
+               }
+               blk_rings->common.req_cons = ++rc; /* before make_response() */
 
                switch (req.operation) {
                case BLKIF_OP_READ:
@@ -1222,7 +1235,7 @@ static void dispatch_rw_block_io(blkif_t *blkif,
                WPRINTK("blktap: fe_ring is full, can't add "
                        "IO Request will be dropped. %d %d\n",
                        RING_SIZE(&info->ufe_ring),
-                       RING_SIZE(&blkif->blk_ring));
+                       RING_SIZE(&blkif->blk_rings.common));
                goto fail_response;
        }
 
@@ -1410,32 +1423,51 @@ static void dispatch_rw_block_io(blkif_t *blkif,
 static void make_response(blkif_t *blkif, unsigned long id, 
                           unsigned short op, int st)
 {
-       blkif_response_t *resp;
+       blkif_response_t  resp;
        unsigned long     flags;
-       blkif_back_ring_t *blk_ring = &blkif->blk_ring;
+       blkif_back_rings_t *blk_rings = &blkif->blk_rings;
        int more_to_do = 0;
        int notify;
 
+       resp.id        = id;
+       resp.operation = op;
+       resp.status    = st;
+
        spin_lock_irqsave(&blkif->blk_ring_lock, flags);
-       /* Place on the response ring for the relevant domain. */ 
-       resp = RING_GET_RESPONSE(blk_ring, blk_ring->rsp_prod_pvt);
-       resp->id        = id;
-       resp->operation = op;
-       resp->status    = st;
-       blk_ring->rsp_prod_pvt++;
-       RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(blk_ring, notify);
-
-       if (blk_ring->rsp_prod_pvt == blk_ring->req_cons) {
+       /* Place on the response ring for the relevant domain. */
+       switch (blkif->blk_protocol) {
+       case BLKIF_PROTOCOL_NATIVE:
+               memcpy(RING_GET_RESPONSE(&blk_rings->native,
+                                        blk_rings->native.rsp_prod_pvt),
+                      &resp, sizeof(resp));
+               break;
+       case BLKIF_PROTOCOL_X86_32:
+               memcpy(RING_GET_RESPONSE(&blk_rings->x86_32,
+                                        blk_rings->x86_32.rsp_prod_pvt),
+                      &resp, sizeof(resp));
+               break;
+       case BLKIF_PROTOCOL_X86_64:
+               memcpy(RING_GET_RESPONSE(&blk_rings->x86_64,
+                                        blk_rings->x86_64.rsp_prod_pvt),
+                      &resp, sizeof(resp));
+               break;
+       default:
+               BUG();
+       }
+       blk_rings->common.rsp_prod_pvt++;
+       RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
+
+       if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
                /*
                 * Tail check for pending requests. Allows frontend to avoid
                 * notifications if requests are already in flight (lower
                 * overheads and promotes batching).
                 */
-               RING_FINAL_CHECK_FOR_REQUESTS(blk_ring, more_to_do);
-       } else if (RING_HAS_UNCONSUMED_REQUESTS(blk_ring)) {
+               RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
+       } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
                more_to_do = 1;
+       }
 
-       }       
        spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
        if (more_to_do)
                blkif_notify_work(blkif);
index fc42c63508c8eb8813cd5242e780efd83d8d1bb4..019f7ba58b24d4dc9d27757eb7f978b6f2bc89c2 100644 (file)
@@ -39,8 +39,7 @@
 #include <asm/pgalloc.h>
 #include <xen/evtchn.h>
 #include <asm/hypervisor.h>
-#include <xen/interface/io/blkif.h>
-#include <xen/interface/io/ring.h>
+#include <xen/blkif.h>
 #include <xen/gnttab.h>
 #include <xen/driver_util.h>
 
@@ -58,7 +57,8 @@ typedef struct blkif_st {
        /* Physical parameters of the comms window. */
        unsigned int      irq;
        /* Comms information. */
-       blkif_back_ring_t blk_ring;
+       enum blkif_protocol blk_protocol;
+       blkif_back_rings_t blk_rings;
        struct vm_struct *blk_ring_area;
        /* Back pointer to the backend_info. */
        struct backend_info *be;
index 44f653ba516dbcfed8e5bfa648e7becb71ce2087..15812e8455f4de7e3ffd23c48f7ee2b84961f548 100644 (file)
@@ -96,7 +96,6 @@ static void unmap_frontend_page(blkif_t *blkif)
 int tap_blkif_map(blkif_t *blkif, unsigned long shared_page, 
                  unsigned int evtchn)
 {
-       blkif_sring_t *sring;
        int err;
 
        /* Already connected through? */
@@ -112,8 +111,31 @@ int tap_blkif_map(blkif_t *blkif, unsigned long shared_page,
                return err;
        }
 
-       sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
-       BACK_RING_INIT(&blkif->blk_ring, sring, PAGE_SIZE);
+       switch (blkif->blk_protocol) {
+       case BLKIF_PROTOCOL_NATIVE:
+       {
+               blkif_sring_t *sring;
+               sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
+               BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
+               break;
+       }
+       case BLKIF_PROTOCOL_X86_32:
+       {
+               blkif_x86_32_sring_t *sring_x86_32;
+               sring_x86_32 = (blkif_x86_32_sring_t *)blkif->blk_ring_area->addr;
+               BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
+               break;
+       }
+       case BLKIF_PROTOCOL_X86_64:
+       {
+               blkif_x86_64_sring_t *sring_x86_64;
+               sring_x86_64 = (blkif_x86_64_sring_t *)blkif->blk_ring_area->addr;
+               BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
+               break;
+       }
+       default:
+               BUG();
+       }
 
        err = bind_interdomain_evtchn_to_irqhandler(
                blkif->domid, evtchn, tap_blkif_be_int,
@@ -134,10 +156,10 @@ void tap_blkif_unmap(blkif_t *blkif)
                unbind_from_irqhandler(blkif->irq, blkif);
                blkif->irq = 0;
        }
-       if (blkif->blk_ring.sring) {
+       if (blkif->blk_rings.common.sring) {
                unmap_frontend_page(blkif);
                free_vm_area(blkif->blk_ring_area);
-               blkif->blk_ring.sring = NULL;
+               blkif->blk_rings.common.sring = NULL;
        }
 }
 
index fb2979b70b1b662351b6c60403b02e4873ba23e0..58a79e7e4de32f0b1d2db18449448d8629477761 100644 (file)
@@ -340,6 +340,7 @@ static int connect_ring(struct backend_info *be)
        struct xenbus_device *dev = be->dev;
        unsigned long ring_ref;
        unsigned int evtchn;
+       char protocol[64];
        int err;
 
        DPRINTK("%s\n", dev->otherend);
@@ -353,6 +354,24 @@ static int connect_ring(struct backend_info *be)
                return err;
        }
 
+       be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
+       err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
+                           "%63s", protocol, NULL);
+       if (err)
+               strcpy(protocol, "unspecified, assuming native");
+       else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
+               be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
+       else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
+               be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
+       else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
+               be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
+       else {
+               xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
+               return -1;
+       }
+       printk("blktap: ring-ref %ld, event-channel %d, protocol %d (%s)\n",
+              ring_ref, evtchn, be->blkif->blk_protocol, protocol);
+
        /* Map the shared frame, irq etc. */
        err = tap_blkif_map(be->blkif, ring_ref, evtchn);
        if (err) {
diff --git a/linux-2.6-xen-sparse/include/xen/blkif.h b/linux-2.6-xen-sparse/include/xen/blkif.h
new file mode 100644 (file)
index 0000000..4d6c663
--- /dev/null
@@ -0,0 +1,97 @@
+#ifndef __XEN_BLKIF_H__
+#define __XEN_BLKIF_H__
+
+#include <xen/interface/io/ring.h>
+#include <xen/interface/io/blkif.h>
+#include <xen/interface/io/protocols.h>
+
+/* Not a real protocol.  Used to generate ring structs which contain
+ * the elements common to all protocols only.  This way we get a
+ * compiler-checkable way to use common struct elements, so we can
+ * avoid using switch(protocol) in a number of places.  */
+struct blkif_common_request {
+       char dummy;
+};
+struct blkif_common_response {
+       char dummy;
+};
+
+/* i386 protocol version */
+#pragma pack(push, 4)
+struct blkif_x86_32_request {
+       uint8_t        operation;    /* BLKIF_OP_???                         */
+       uint8_t        nr_segments;  /* number of segments                   */
+       blkif_vdev_t   handle;       /* only for read/write requests         */
+       uint64_t       id;           /* private guest value, echoed in resp  */
+       blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
+       struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+};
+struct blkif_x86_32_response {
+       uint64_t        id;              /* copied from request */
+       uint8_t         operation;       /* copied from request */
+       int16_t         status;          /* BLKIF_RSP_???       */
+};
+typedef struct blkif_x86_32_request blkif_x86_32_request_t;
+typedef struct blkif_x86_32_response blkif_x86_32_response_t;
+#pragma pack(pop)
+
+/* x86_64 protocol version */
+struct blkif_x86_64_request {
+       uint8_t        operation;    /* BLKIF_OP_???                         */
+       uint8_t        nr_segments;  /* number of segments                   */
+       blkif_vdev_t   handle;       /* only for read/write requests         */
+       uint64_t       __attribute__((__aligned__(8))) id;
+       blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
+       struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+};
+struct blkif_x86_64_response {
+       uint64_t       __attribute__((__aligned__(8))) id;
+       uint8_t         operation;       /* copied from request */
+       int16_t         status;          /* BLKIF_RSP_???       */
+};
+typedef struct blkif_x86_64_request blkif_x86_64_request_t;
+typedef struct blkif_x86_64_response blkif_x86_64_response_t;
+
+DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, struct blkif_common_response);
+DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, struct blkif_x86_32_response);
+DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, struct blkif_x86_64_response);
+
+union blkif_back_rings {
+       blkif_back_ring_t        native;
+       blkif_common_back_ring_t common;
+       blkif_x86_32_back_ring_t x86_32;
+       blkif_x86_64_back_ring_t x86_64;
+};
+typedef union blkif_back_rings blkif_back_rings_t;
+
+enum blkif_protocol {
+       BLKIF_PROTOCOL_NATIVE = 1,
+       BLKIF_PROTOCOL_X86_32 = 2,
+       BLKIF_PROTOCOL_X86_64 = 3,
+};
+
+static void inline blkif_get_x86_32_req(blkif_request_t *dst, blkif_x86_32_request_t *src)
+{
+       int i;
+       dst->operation = src->operation;
+       dst->nr_segments = src->nr_segments;
+       dst->handle = src->handle;
+       dst->id = src->id;
+       dst->sector_number = src->sector_number;
+       for (i = 0; i < src->nr_segments; i++)
+               dst->seg[i] = src->seg[i];
+}
+
+static void inline blkif_get_x86_64_req(blkif_request_t *dst, blkif_x86_64_request_t *src)
+{
+       int i;
+       dst->operation = src->operation;
+       dst->nr_segments = src->nr_segments;
+       dst->handle = src->handle;
+       dst->id = src->id;
+       dst->sector_number = src->sector_number;
+       for (i = 0; i < src->nr_segments; i++)
+               dst->seg[i] = src->seg[i];
+}
+
+#endif /* __XEN_BLKIF_H__ */
index 43fdf6ecbe25ab6e0f2b484fde9a9cd1850e1d87..548ba95299a55d6143ab69e795d322c7197ca416 100644 (file)
  */
 #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11
 
+struct blkif_request_segment {
+    grant_ref_t gref;        /* reference to I/O buffer frame        */
+    /* @first_sect: first sector in frame to transfer (inclusive).   */
+    /* @last_sect: last sector in frame to transfer (inclusive).     */
+    uint8_t     first_sect, last_sect;
+};
+
 struct blkif_request {
     uint8_t        operation;    /* BLKIF_OP_???                         */
     uint8_t        nr_segments;  /* number of segments                   */
     blkif_vdev_t   handle;       /* only for read/write requests         */
     uint64_t       id;           /* private guest value, echoed in resp  */
     blkif_sector_t sector_number;/* start sector idx on disk (r/w only)  */
-    struct blkif_request_segment {
-        grant_ref_t gref;        /* reference to I/O buffer frame        */
-        /* @first_sect: first sector in frame to transfer (inclusive).   */
-        /* @last_sect: last sector in frame to transfer (inclusive).     */
-        uint8_t     first_sect, last_sect;
-    } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+    struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
 };
 typedef struct blkif_request blkif_request_t;